From 4c83085305433665a0ba83c5a1ede19d20cea2af Mon Sep 17 00:00:00 2001 From: "lynx@idefix.cl.cam.ac.uk" Date: Fri, 20 Dec 2002 11:54:11 +0000 Subject: [PATCH] bitkeeper revision 1.7.3.41 (3e0304e3ruV7OqrhZfYs29ld8-OyuA) Keir's & a bit of mine get_unmapped_area fixes --- .../arch/xeno/drivers/dom0/dom0_memory.c | 24 +++--- .../arch/xeno/mm/get_unmapped_area.c | 82 ++++++++++++++++--- .../include/asm-xeno/mmu.h | 3 +- 3 files changed, 88 insertions(+), 21 deletions(-) diff --git a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c index 85ccd0ba9a..82bdb4f07f 100644 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c @@ -20,18 +20,14 @@ #define MAP_CONT 0 #define MAP_DISCONT 1 +extern struct list_head * find_direct(struct list_head *, unsigned long); + /* now, this is grimm, kmalloc seems to have problems allocating small mem * blocks, so i have decided to use fixed (a bit) larger blocks... this needs * to be traced down but no time now. */ #define KMALLOC_SIZE 128 -/* - * maps a range of physical memory into the requested pages. the old - * mappings are removed. any references to nonexistent pages results - * in null mappings (currently treated as "copy-on-access") - */ - /* bd240: functions below perform direct mapping to the real physical pages needed for * mapping various hypervisor specific structures needed in dom0 userspace by various * management applications such as domain builder etc. @@ -169,6 +165,7 @@ unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, pgprot_t prot, int flag, int tot_pages) { direct_mmap_node_t * dmmap; + struct list_head * entry; unsigned long addr; int ret = 0; @@ -184,12 +181,19 @@ unsigned long direct_mmap(unsigned long phys_addr, unsigned long size, goto out; } - /* add node on the list of directly mapped areas */ + /* add node on the list of directly mapped areas, make sure the + * list remains sorted. + */ //dmmap = (direct_mmap_node_t *)kmalloc(GFP_KERNEL, sizeof(direct_mmap_node_t)); dmmap = (direct_mmap_node_t *)kmalloc(GFP_KERNEL, KMALLOC_SIZE); - - dmmap->addr = addr; - list_add(&dmmap->list, ¤t->mm->context.direct_list); + dmmap->vm_start = addr; + dmmap->vm_end = addr + size; + entry = find_direct(¤t->mm->context.direct_list, addr); + if(entry != ¤t->mm->context.direct_list){ + list_add_tail(&dmmap->list, entry); + } else { + list_add(&dmmap->list, ¤t->mm->context.direct_list); + } /* and perform the mapping */ if(flag == MAP_DISCONT){ diff --git a/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c b/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c index 2b17754e0e..54a4649d76 100644 --- a/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c @@ -14,15 +14,13 @@ #include #include +/* static int direct_mapped(unsigned long addr) { direct_mmap_node_t * node; struct list_head * curr; struct list_head * direct_list = ¤t->mm->context.direct_list; - /* now, this loop is going to make things slow, maybe should think - * of a better way to implement it, maybe without list_head - */ curr = direct_list->next; while(curr != direct_list){ node = list_entry(curr, direct_mmap_node_t, list); @@ -36,7 +34,8 @@ static int direct_mapped(unsigned long addr) return 1; } - +*/ +/* unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma; @@ -54,15 +53,9 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(TASK_UNMAPPED_BASE); for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - /* here we check whether the vma is big enough and we also check - * whether it has already been direct mapped, in which case it - * is not available. this is the only difference to generic - * arch_get_unmapped_area. - */ if(current->pid > 100){ printk(KERN_ALERT "bd240 debug: gua: vm addr found %lx\n", addr); printk(KERN_ALERT "bd240 debug: gua: first condition %d, %lx, %lx\n",vma, addr + len, vma->vm_start); @@ -74,3 +67,72 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = vma->vm_end; } } +*/ +struct list_head *find_direct(struct list_head *list, unsigned long addr) +{ + for ( curr = direct_list->next; curr != direct_list; curr = curr->next ) + { + node = list_entry(curr, direct_mmap_node_t, list); + if( node->vm_start > addr ) break; + } + + return curr; +} + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long +addr, unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct *vma; + direct_mmap_node_t * node; + struct list_head * curr; + struct list_head * direct_list = ¤t->mm->context.direct_list; + + if (len > TASK_SIZE) + return -ENOMEM; + + if ( addr ) + { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); + curr = find_direct(direct_list, addr); + node = list_entry(curr, direct_mmap_node_t, list); + if ( (TASK_SIZE - len >= addr) && + (!vma || addr + len <= vma->vm_start) && + ((curr == direct_list) || addr + len <= node->vm_start) ) + return addr; + } + + addr = PAGE_ALIGN(TASK_UNMAPPED_BASE); + + + /* Find first VMA and direct_map nodes with vm_start > addr */ + vma = find_vma(current->mm, addr); + curr = find_direct(direct_list, addr); + node = list_entry(curr, direct_mmap_node_t, list); + + for ( ; ; ) + { + if ( TASK_SIZE - len < addr ) return -ENOMEM; + + if ( vma && (vma->vm_start < node->vm_start) ) + { + /* Do we fit before VMA node? */ + if ( addr + len <= vma->vm_start ) return addr; + addr = vma->vm_end; + vma = vma->vm_next; + } + else if ( curr != direct_list ) + { + /* Do we fit before direct_map node? */ + if ( addr + len <= node->vm_start) return addr; + addr = node->vm_end; + curr = curr->next; + node = list_entry(curr, direct_mmap_node_t, list); + } + else + { + /* Reached end of VMA and direct_map lists */ + return addr; + } + } +} diff --git a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h index 588e66c0b9..6a47b34c41 100644 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h @@ -7,7 +7,8 @@ /* describes dirrectly mapped vma nodes */ typedef struct { struct list_head list; - unsigned long addr; + unsigned long vm_start; + unsigned long vm_end; } direct_mmap_node_t; /* -- 2.30.2